diff options
author | Keith Davis <keith.davis@arm.com> | 2022-05-17 10:06:53 +0100 |
---|---|---|
committer | Keith Davis <keith.davis@arm.com> | 2022-05-23 09:59:34 +0100 |
commit | 721e629fa07e65d6a53c093518021e71e48eeac2 (patch) | |
tree | 1f3826741777e5d2cb28be964f46163f49abc271 /src/armnn/test/optimizations | |
parent | 4a09159930f37dffa51c194ea8b565612bbe8431 (diff) | |
download | armnn-721e629fa07e65d6a53c093518021e71e48eeac2.tar.gz |
IVGCVSW-6123 ConstTensorsAsInputs: Conv2d
* Use new INetwork::AddConvolution2dLayer
instead of deprecated version
* Remove duplicated test in SerlializerTests
* Fix some cosmetics
Signed-off-by: Keith Davis <keith.davis@arm.com>
Change-Id: I3407815bfdc1cdc01ca0a667b8e4d80d8621783f
Diffstat (limited to 'src/armnn/test/optimizations')
-rw-r--r-- | src/armnn/test/optimizations/FoldPadTests.cpp | 18 | ||||
-rw-r--r-- | src/armnn/test/optimizations/FuseBatchNormTests.cpp | 23 |
2 files changed, 26 insertions, 15 deletions
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp index 027b10377d..14c211f9bf 100644 --- a/src/armnn/test/optimizations/FoldPadTests.cpp +++ b/src/armnn/test/optimizations/FoldPadTests.cpp @@ -636,13 +636,9 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio std::vector<float> biasVector = {5, 6, 7, 8}; TensorInfo biasInfo({4}, DataType::Float32, 0.0f, 0, true); ConstTensor bias(biasInfo, biasVector); - Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias); - ARMNN_NO_DEPRECATE_WARN_BEGIN - IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor, - weights, - optionalBias, - "Conv2D"); - ARMNN_NO_DEPRECATE_WARN_END + + IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor, "Conv2D"); + TensorInfo outputInfo(4, outputShape, DataType::Float32); conv2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); @@ -653,6 +649,14 @@ TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimizatio padLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(0)); conv2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + auto weightsLayer = network->AddConstantLayer(weights, "Weights"); + weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo()); + weightsLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(1)); + + auto biasLayer = network->AddConstantLayer(bias, "Bias"); + biasLayer->GetOutputSlot(0).SetTensorInfo(bias.GetInfo()); + biasLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(2)); + // Create ArmNN runtime IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options // Optimise the network diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp index 4a94f7889b..54cbbce89f 100644 --- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp +++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp @@ -31,9 +31,10 @@ public: const Optional<ConstTensor> &biases, const char *name) { - ARMNN_NO_DEPRECATE_WARN_BEGIN - return network->AddConvolution2dLayer(descriptor, weights, biases, name); - ARMNN_NO_DEPRECATE_WARN_END + IgnoreUnused(weights); + IgnoreUnused(biases); + + return network->AddConvolution2dLayer(descriptor, name); } static std::vector<IConnectableLayer*> AddConstantLayers(INetwork *network, @@ -41,12 +42,18 @@ public: const ConstTensor &weights, const Optional<ConstTensor> &biases) { - IgnoreUnused(network); - IgnoreUnused(descriptor); - IgnoreUnused(weights); - IgnoreUnused(biases); + auto weightsLayer = network->AddConstantLayer(weights, "Weights"); + weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo()); + std::vector<IConnectableLayer*> layers = {weightsLayer}; - return {}; + if (descriptor.m_BiasEnabled) + { + auto biasLayer = network->AddConstantLayer(biases.value(), "Bias"); + biasLayer->GetOutputSlot(0).SetTensorInfo(biases.value().GetInfo()); + layers.emplace_back(biasLayer); + } + + return layers; } }; |