diff options
author | Cathal Corbett <cathal.corbett@arm.com> | 2022-04-14 17:55:11 +0100 |
---|---|---|
committer | Cathal Corbett <cathal.corbett@arm.com> | 2022-05-05 16:10:06 +0000 |
commit | 0690265d83e5aa79bd174544a7b35330781619dd (patch) | |
tree | 2cb825017ee202ebcfa9c8428271a4dccaed72a4 /src/armnn/test/optimizations/FoldPadTests.cpp | |
parent | 3a3a6bfaedc64fac3644c8fe88dbfc3947e2b3ab (diff) | |
download | armnn-0690265d83e5aa79bd174544a7b35330781619dd.tar.gz |
IVGCVSW-6127 ConstTensorsAsInput: DepthwiseConvolution2d
!android-nn-driver:7418
* Update Front-end and Tools.
* Updated Serializer, Deserializer and unit tests to reflect this.
* Updated TfLiteDelegate, TfLiteParser and OnnxParser.
* Change NNDriver to new API.
* Updated Ref.
* Neon and Cl backend partially completed (Backend.cpp files).
* Added dynamic or constant input EndToEnd tests.
* Added ConstantTensorAsInputMemeberVariableRedirect Optimization.
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: Ib18b6c10a093042e165e25237dc04a4c67ba82da
Diffstat (limited to 'src/armnn/test/optimizations/FoldPadTests.cpp')
-rw-r--r-- | src/armnn/test/optimizations/FoldPadTests.cpp | 19 |
1 files changed, 15 insertions, 4 deletions
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp index 2f9e1c6d31..9919c6d0e6 100644 --- a/src/armnn/test/optimizations/FoldPadTests.cpp +++ b/src/armnn/test/optimizations/FoldPadTests.cpp @@ -126,14 +126,18 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer") auto* depthwiseConv2dLayer = graph.AddLayer<DepthwiseConvolution2dLayer>(depthwiseConvolution2dDescriptor, "depthwiseConv2d"); - depthwiseConv2dLayer->m_Weight = std::make_unique<ScopedTensorHandle>(weights); + auto* weightsLayer = graph.AddLayer<ConstantLayer>("weights"); + + weightsLayer->GetOutputSlot().SetTensorInfo(weights.GetInfo()); depthwiseConv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo); + depthwiseConv2dLayer->m_Weight = std::make_shared<ScopedTensorHandle>(weights); Layer* output = graph.AddLayer<OutputLayer>(0, "output"); // Connect up layers - input -> pad -> depthwiseConv2d -> output input->GetOutputSlot().Connect(padLayer->GetInputSlot(0)); padLayer->GetOutputSlot().Connect(depthwiseConv2dLayer->GetInputSlot(0)); + weightsLayer->GetOutputSlot().Connect(depthwiseConv2dLayer->GetInputSlot(1)); depthwiseConv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0)); auto checkSimpleDepthwiseConv2d = [](const Layer* const layer)->bool { @@ -151,6 +155,7 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer") &IsLayerOfType<InputLayer>, &IsLayerOfType<PadLayer>, checkSimpleDepthwiseConv2d, + &IsLayerOfType<ConstantLayer>, &IsLayerOfType<OutputLayer>)); armnn::Optimizer::Pass(graph, MakeOptimizations(FoldPadIntoDepthwiseConvolution2d())); @@ -170,6 +175,7 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer") CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, checkPadFoldedIntoDepthwiseConv2d, + &IsLayerOfType<ConstantLayer>, &IsLayerOfType<OutputLayer>)); } @@ -741,11 +747,8 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOp std::vector<float> biasVector = {5, 6, 7, 8, 9, 10, 11, 12, 5, 6, 7, 8}; TensorInfo biasInfo({12}, DataType::Float32, 0.0f, 0, true); ConstTensor bias(biasInfo, biasVector); - Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias); IConnectableLayer* conv2dLayer = network->AddDepthwiseConvolution2dLayer(convDescriptor, - weights, - optionalBias, "DepthwiseConv2D"); TensorInfo outputInfo(4, outputShape, DataType::Float32); @@ -758,6 +761,14 @@ TEST_CASE("FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOp padLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(0)); conv2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + auto weightsLayer = network->AddConstantLayer(weights, "Weights"); + weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo()); + weightsLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(1)); + + auto biasLayer = network->AddConstantLayer(bias, "Bias"); + biasLayer->GetOutputSlot(0).SetTensorInfo(bias.GetInfo()); + biasLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(2)); + // Create ArmNN runtime IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options // Optimise the network |