From 721e629fa07e65d6a53c093518021e71e48eeac2 Mon Sep 17 00:00:00 2001 From: Keith Davis Date: Tue, 17 May 2022 10:06:53 +0100 Subject: IVGCVSW-6123 ConstTensorsAsInputs: Conv2d * Use new INetwork::AddConvolution2dLayer instead of deprecated version * Remove duplicated test in SerlializerTests * Fix some cosmetics Signed-off-by: Keith Davis Change-Id: I3407815bfdc1cdc01ca0a667b8e4d80d8621783f --- src/armnn/test/NetworkTests.cpp | 11 +---------- src/armnn/test/OptimizerTests.cpp | 8 ++++---- src/armnn/test/SubgraphViewTests.cpp | 4 ++-- src/armnn/test/optimizations/FoldPadTests.cpp | 18 ++++++++++------- .../test/optimizations/FuseBatchNormTests.cpp | 23 ++++++++++++++-------- 5 files changed, 33 insertions(+), 31 deletions(-) (limited to 'src/armnn/test') diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp index 7756f40623..9d9810408e 100644 --- a/src/armnn/test/NetworkTests.cpp +++ b/src/armnn/test/NetworkTests.cpp @@ -605,10 +605,6 @@ TEST_CASE("ObtainConv2DDescriptorFromIConnectableLayer") { armnn::NetworkImpl net; - unsigned int dims[] = { 10,1,1,1 }; - std::vector convWeightsData(10); - armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), convWeightsData); - armnn::Convolution2dDescriptor convDesc2d; convDesc2d.m_PadLeft = 2; convDesc2d.m_PadRight = 3; @@ -620,12 +616,7 @@ TEST_CASE("ObtainConv2DDescriptorFromIConnectableLayer") convDesc2d.m_DilationY = 3; convDesc2d.m_BiasEnabled = false; convDesc2d.m_DataLayout = armnn::DataLayout::NCHW; - ARMNN_NO_DEPRECATE_WARN_BEGIN - armnn::IConnectableLayer* const convLayer = net.AddConvolution2dLayer(convDesc2d, - weights, - armnn::EmptyOptional(), - "conv layer"); - ARMNN_NO_DEPRECATE_WARN_END + armnn::IConnectableLayer* const convLayer = net.AddConvolution2dLayer(convDesc2d, "conv layer"); CHECK(convLayer); const armnn::BaseDescriptor& descriptor = convLayer->GetParameters(); diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp index 3dd55279c6..b78863dddc 100644 --- a/src/armnn/test/OptimizerTests.cpp +++ b/src/armnn/test/OptimizerTests.cpp @@ -885,11 +885,11 @@ TEST_CASE("OptimizeForExclusiveConnectionsFuseTest") // Define the network Graph graph; - auto input = graph.AddLayer(0, "input"); + auto input = graph.AddLayer(0, "input"); auto weightsLayer = graph.AddLayer("Weights"); - auto conv = graph.AddLayer(convolution2dDescriptor, "convolution"); - auto batchNorm = graph.AddLayer(batchNormDescriptor, "batchNorm"); - auto output = graph.AddLayer(0, "output"); + auto conv = graph.AddLayer(convolution2dDescriptor, "convolution"); + auto batchNorm = graph.AddLayer(batchNormDescriptor, "batchNorm"); + auto output = graph.AddLayer(0, "output"); // Set layer information input->GetOutputSlot().SetTensorInfo(inputInfo); diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp index d7465c8361..feeea5d478 100644 --- a/src/armnn/test/SubgraphViewTests.cpp +++ b/src/armnn/test/SubgraphViewTests.cpp @@ -978,8 +978,8 @@ TEST_CASE("MultipleSimpleSubgraphs") "m3"); auto x2 = graph.InsertNewLayer(m3->GetInputSlot(0), - Convolution2dDescriptor{}, - "x2"); + Convolution2dDescriptor{}, + "x2"); auto w2 = graph.InsertNewLayer(x2->GetInputSlot(1), "w2"); diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp index 027b10377d..14c211f9bf 100644 --- a/src/armnn/test/optimizations/FoldPadTests.cpp +++ b/src/armnn/test/optimizations/FoldPadTests.cpp @@ -636,13 +636,9 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio std::vector biasVector = {5, 6, 7, 8}; TensorInfo biasInfo({4}, DataType::Float32, 0.0f, 0, true); ConstTensor bias(biasInfo, biasVector); - Optional optionalBias = Optional(bias); - ARMNN_NO_DEPRECATE_WARN_BEGIN - IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor, - weights, - optionalBias, - "Conv2D"); - ARMNN_NO_DEPRECATE_WARN_END + + IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor, "Conv2D"); + TensorInfo outputInfo(4, outputShape, DataType::Float32); conv2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); @@ -653,6 +649,14 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio padLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(0)); conv2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + auto weightsLayer = network->AddConstantLayer(weights, "Weights"); + weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo()); + weightsLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(1)); + + auto biasLayer = network->AddConstantLayer(bias, "Bias"); + biasLayer->GetOutputSlot(0).SetTensorInfo(bias.GetInfo()); + biasLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(2)); + // Create ArmNN runtime IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options // Optimise the network diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp index 4a94f7889b..54cbbce89f 100644 --- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp +++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp @@ -31,9 +31,10 @@ public: const Optional &biases, const char *name) { - ARMNN_NO_DEPRECATE_WARN_BEGIN - return network->AddConvolution2dLayer(descriptor, weights, biases, name); - ARMNN_NO_DEPRECATE_WARN_END + IgnoreUnused(weights); + IgnoreUnused(biases); + + return network->AddConvolution2dLayer(descriptor, name); } static std::vector AddConstantLayers(INetwork *network, @@ -41,12 +42,18 @@ public: const ConstTensor &weights, const Optional &biases) { - IgnoreUnused(network); - IgnoreUnused(descriptor); - IgnoreUnused(weights); - IgnoreUnused(biases); + auto weightsLayer = network->AddConstantLayer(weights, "Weights"); + weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo()); + std::vector layers = {weightsLayer}; - return {}; + if (descriptor.m_BiasEnabled) + { + auto biasLayer = network->AddConstantLayer(biases.value(), "Bias"); + biasLayer->GetOutputSlot(0).SetTensorInfo(biases.value().GetInfo()); + layers.emplace_back(biasLayer); + } + + return layers; } }; -- cgit v1.2.1