diff options
author | Keith Davis <keith.davis@arm.com> | 2022-05-17 10:06:53 +0100 |
---|---|---|
committer | Keith Davis <keith.davis@arm.com> | 2022-05-23 09:59:34 +0100 |
commit | 721e629fa07e65d6a53c093518021e71e48eeac2 (patch) | |
tree | 1f3826741777e5d2cb28be964f46163f49abc271 /src/armnn | |
parent | 4a09159930f37dffa51c194ea8b565612bbe8431 (diff) | |
download | armnn-721e629fa07e65d6a53c093518021e71e48eeac2.tar.gz |
IVGCVSW-6123 ConstTensorsAsInputs: Conv2d
* Use new INetwork::AddConvolution2dLayer
instead of deprecated version
* Remove duplicated test in SerlializerTests
* Fix some cosmetics
Signed-off-by: Keith Davis <keith.davis@arm.com>
Change-Id: I3407815bfdc1cdc01ca0a667b8e4d80d8621783f
Diffstat (limited to 'src/armnn')
-rw-r--r-- | src/armnn/Network.cpp | 6 | ||||
-rw-r--r-- | src/armnn/test/NetworkTests.cpp | 11 | ||||
-rw-r--r-- | src/armnn/test/OptimizerTests.cpp | 8 | ||||
-rw-r--r-- | src/armnn/test/SubgraphViewTests.cpp | 4 | ||||
-rw-r--r-- | src/armnn/test/optimizations/FoldPadTests.cpp | 18 | ||||
-rw-r--r-- | src/armnn/test/optimizations/FuseBatchNormTests.cpp | 23 |
6 files changed, 36 insertions, 34 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index fecc766836..f2ba94f597 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -90,9 +90,9 @@ IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor ARMNN_NO_DEPRECATE_WARN_BEGIN IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, - const ConstTensor& weights, - const Optional<ConstTensor>& biases, - const char* name) + const ConstTensor& weights, + const Optional<ConstTensor>& biases, + const char* name) { return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, weights, diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp index 7756f40623..9d9810408e 100644 --- a/src/armnn/test/NetworkTests.cpp +++ b/src/armnn/test/NetworkTests.cpp @@ -605,10 +605,6 @@ TEST_CASE("ObtainConv2DDescriptorFromIConnectableLayer") { armnn::NetworkImpl net; - unsigned int dims[] = { 10,1,1,1 }; - std::vector<float> convWeightsData(10); - armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), convWeightsData); - armnn::Convolution2dDescriptor convDesc2d; convDesc2d.m_PadLeft = 2; convDesc2d.m_PadRight = 3; @@ -620,12 +616,7 @@ TEST_CASE("ObtainConv2DDescriptorFromIConnectableLayer") convDesc2d.m_DilationY = 3; convDesc2d.m_BiasEnabled = false; convDesc2d.m_DataLayout = armnn::DataLayout::NCHW; - ARMNN_NO_DEPRECATE_WARN_BEGIN - armnn::IConnectableLayer* const convLayer = net.AddConvolution2dLayer(convDesc2d, - weights, - armnn::EmptyOptional(), - "conv layer"); - ARMNN_NO_DEPRECATE_WARN_END + armnn::IConnectableLayer* const convLayer = net.AddConvolution2dLayer(convDesc2d, "conv layer"); CHECK(convLayer); const armnn::BaseDescriptor& descriptor = convLayer->GetParameters(); diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp index 3dd55279c6..b78863dddc 100644 --- a/src/armnn/test/OptimizerTests.cpp +++ b/src/armnn/test/OptimizerTests.cpp @@ -885,11 +885,11 @@ TEST_CASE("OptimizeForExclusiveConnectionsFuseTest") // Define the network Graph graph; - auto input = graph.AddLayer<InputLayer>(0, "input"); + auto input = graph.AddLayer<InputLayer>(0, "input"); auto weightsLayer = graph.AddLayer<ConstantLayer>("Weights"); - auto conv = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "convolution"); - auto batchNorm = graph.AddLayer<BatchNormalizationLayer>(batchNormDescriptor, "batchNorm"); - auto output = graph.AddLayer<OutputLayer>(0, "output"); + auto conv = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "convolution"); + auto batchNorm = graph.AddLayer<BatchNormalizationLayer>(batchNormDescriptor, "batchNorm"); + auto output = graph.AddLayer<OutputLayer>(0, "output"); // Set layer information input->GetOutputSlot().SetTensorInfo(inputInfo); diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp index d7465c8361..feeea5d478 100644 --- a/src/armnn/test/SubgraphViewTests.cpp +++ b/src/armnn/test/SubgraphViewTests.cpp @@ -978,8 +978,8 @@ TEST_CASE("MultipleSimpleSubgraphs") "m3"); auto x2 = graph.InsertNewLayer<Convolution2dLayer>(m3->GetInputSlot(0), - Convolution2dDescriptor{}, - "x2"); + Convolution2dDescriptor{}, + "x2"); auto w2 = graph.InsertNewLayer<ConstantLayer>(x2->GetInputSlot(1), "w2"); diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp index 027b10377d..14c211f9bf 100644 --- a/src/armnn/test/optimizations/FoldPadTests.cpp +++ b/src/armnn/test/optimizations/FoldPadTests.cpp @@ -636,13 +636,9 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio std::vector<float> biasVector = {5, 6, 7, 8}; TensorInfo biasInfo({4}, DataType::Float32, 0.0f, 0, true); ConstTensor bias(biasInfo, biasVector); - Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias); - ARMNN_NO_DEPRECATE_WARN_BEGIN - IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor, - weights, - optionalBias, - "Conv2D"); - ARMNN_NO_DEPRECATE_WARN_END + + IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor, "Conv2D"); + TensorInfo outputInfo(4, outputShape, DataType::Float32); conv2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); @@ -653,6 +649,14 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio padLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(0)); conv2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + auto weightsLayer = network->AddConstantLayer(weights, "Weights"); + weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo()); + weightsLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(1)); + + auto biasLayer = network->AddConstantLayer(bias, "Bias"); + biasLayer->GetOutputSlot(0).SetTensorInfo(bias.GetInfo()); + biasLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(2)); + // Create ArmNN runtime IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options // Optimise the network diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp index 4a94f7889b..54cbbce89f 100644 --- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp +++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp @@ -31,9 +31,10 @@ public: const Optional<ConstTensor> &biases, const char *name) { - ARMNN_NO_DEPRECATE_WARN_BEGIN - return network->AddConvolution2dLayer(descriptor, weights, biases, name); - ARMNN_NO_DEPRECATE_WARN_END + IgnoreUnused(weights); + IgnoreUnused(biases); + + return network->AddConvolution2dLayer(descriptor, name); } static std::vector<IConnectableLayer*> AddConstantLayers(INetwork *network, @@ -41,12 +42,18 @@ public: const ConstTensor &weights, const Optional<ConstTensor> &biases) { - IgnoreUnused(network); - IgnoreUnused(descriptor); - IgnoreUnused(weights); - IgnoreUnused(biases); + auto weightsLayer = network->AddConstantLayer(weights, "Weights"); + weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo()); + std::vector<IConnectableLayer*> layers = {weightsLayer}; - return {}; + if (descriptor.m_BiasEnabled) + { + auto biasLayer = network->AddConstantLayer(biases.value(), "Bias"); + biasLayer->GetOutputSlot(0).SetTensorInfo(biases.value().GetInfo()); + layers.emplace_back(biasLayer); + } + + return layers; } }; |