diff options
author | Cathal Corbett <cathal.corbett@arm.com> | 2022-04-14 17:55:11 +0100 |
---|---|---|
committer | Cathal Corbett <cathal.corbett@arm.com> | 2022-05-05 16:10:06 +0000 |
commit | 0690265d83e5aa79bd174544a7b35330781619dd (patch) | |
tree | 2cb825017ee202ebcfa9c8428271a4dccaed72a4 /src/armnn/optimizations/FoldPadIntoLayer2d.hpp | |
parent | 3a3a6bfaedc64fac3644c8fe88dbfc3947e2b3ab (diff) | |
download | armnn-0690265d83e5aa79bd174544a7b35330781619dd.tar.gz |
IVGCVSW-6127 ConstTensorsAsInput: DepthwiseConvolution2d
!android-nn-driver:7418
* Update Front-end and Tools.
* Updated Serializer, Deserializer and unit tests to reflect this.
* Updated TfLiteDelegate, TfLiteParser and OnnxParser.
* Change NNDriver to new API.
* Updated Ref.
* Neon and Cl backend partially completed (Backend.cpp files).
* Added dynamic or constant input EndToEnd tests.
* Added ConstantTensorAsInputMemeberVariableRedirect Optimization.
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: Ib18b6c10a093042e165e25237dc04a4c67ba82da
Diffstat (limited to 'src/armnn/optimizations/FoldPadIntoLayer2d.hpp')
-rw-r--r-- | src/armnn/optimizations/FoldPadIntoLayer2d.hpp | 23 |
1 files changed, 14 insertions, 9 deletions
diff --git a/src/armnn/optimizations/FoldPadIntoLayer2d.hpp b/src/armnn/optimizations/FoldPadIntoLayer2d.hpp index 87117debe9..bbaabb815e 100644 --- a/src/armnn/optimizations/FoldPadIntoLayer2d.hpp +++ b/src/armnn/optimizations/FoldPadIntoLayer2d.hpp @@ -191,21 +191,26 @@ class FoldPadIntoDepthwiseConvolution2dImpl public: void Run(Graph& graph, InputSlot& connection) const { - const auto newConv2dLayer = FoldPadIntoLayer2dImpl<DepthwiseConvolution2dLayer>(graph, connection); + const auto newLayer2d = FoldPadIntoLayer2dImpl<DepthwiseConvolution2dLayer>(graph, connection); - if (newConv2dLayer != nullptr) + if (newLayer2d != nullptr) { - const auto conv2dLayer = PolymorphicDowncast<DepthwiseConvolution2dLayer*>(&connection.GetOwningLayer()); - // Copy weights and bias to the new convolution layer - ARMNN_ASSERT_MSG(conv2dLayer->m_Weight != nullptr, + const auto layer2d = PolymorphicDowncast<DepthwiseConvolution2dLayer*>(&connection.GetOwningLayer()); + + // Move weights and bias layer connections to the new convolution layer + ARMNN_ASSERT_MSG(layer2d->GetInputSlot(1).GetConnection() != nullptr, "FoldPadIntoDepthwiseConvolution2d: Weights data should not be null."); - newConv2dLayer->m_Weight = std::move(conv2dLayer->m_Weight); + Layer& weightLayer = layer2d->GetInputSlot(1).GetConnectedOutputSlot()->GetOwningLayer(); + weightLayer.GetOutputSlot(0).Disconnect(layer2d->GetInputSlot(1)); + weightLayer.GetOutputSlot(0).Connect(newLayer2d->GetInputSlot(1)); - if (conv2dLayer->GetParameters().m_BiasEnabled) + if (layer2d->GetParameters().m_BiasEnabled) { - ARMNN_ASSERT_MSG(conv2dLayer->m_Bias != nullptr, + ARMNN_ASSERT_MSG(layer2d->GetInputSlot(2).GetConnection() != nullptr, "FoldPadIntoDepthwiseConvolution2d: Bias data should not be null if bias is enabled."); - newConv2dLayer->m_Bias = std::move(conv2dLayer->m_Bias); + Layer& biasLayer = layer2d->GetInputSlot(2).GetConnectedOutputSlot()->GetOwningLayer(); + biasLayer.GetOutputSlot(0).Disconnect(layer2d->GetInputSlot(2)); + biasLayer.GetOutputSlot(0).Connect(newLayer2d->GetInputSlot(2)); } } } |