aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJim Flynn <jim.flynn@arm.com>2021-03-23 14:20:19 +0000
committerJim Flynn <jim.flynn@arm.com>2021-03-23 16:20:37 +0000
commit381f5029c643a3c7c2bfa3454a6673c9dedf2fd0 (patch)
tree6f996bddaf6e3b97f8bc2e28c04328609b08e766 /src
parentc74b1750fe8cf7affdbc59edd53357e0ea4efa53 (diff)
downloadarmnn-381f5029c643a3c7c2bfa3454a6673c9dedf2fd0.tar.gz
Revert "Fold PAD into Pooling2d"
This reverts commit 51ce7d487c761358de105f82ff90553570aedac0. Reason for revert: https://jira.arm.com/browse/IVGCVSW-5798 LargeGraph_TENSOR_FLOAT32 CTS tests failures Change-Id: Ib031a47f605340b2202ecf074ce96a8b54c51075
Diffstat (limited to 'src')
-rw-r--r--src/armnn/Network.cpp1
-rw-r--r--src/armnn/optimizations/All.hpp2
-rw-r--r--src/armnn/optimizations/FoldPadIntoConvolution2d.hpp93
-rw-r--r--src/armnn/optimizations/FoldPadIntoLayer2d.hpp117
-rw-r--r--src/armnn/test/OptimizerTests.cpp85
5 files changed, 95 insertions, 203 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 8dad3bba56..9373a6ac15 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1544,7 +1544,6 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
TransposeAsReshape(),
OptimizeConsecutiveReshapes(),
FoldPadIntoConvolution2d(),
- FoldPadIntoPooling2d(),
PermuteAndBatchToSpaceAsDepthToSpace(),
TransposeAndBatchToSpaceAsDepthToSpace(),
FuseBatchNormIntoConvolution2DFloat32(),
diff --git a/src/armnn/optimizations/All.hpp b/src/armnn/optimizations/All.hpp
index 5decc7c969..d042616ba4 100644
--- a/src/armnn/optimizations/All.hpp
+++ b/src/armnn/optimizations/All.hpp
@@ -9,7 +9,7 @@
#include "ConvertConstants.hpp"
#include "ConvertFp32NetworkToBf16.hpp"
#include "ConvertFp32NetworkToFp16.hpp"
-#include "FoldPadIntoLayer2d.hpp"
+#include "FoldPadIntoConvolution2d.hpp"
#include "FuseBatchNorm.hpp"
#include "MovePermuteUp.hpp"
#include "MoveTransposeUp.hpp"
diff --git a/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp b/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp
new file mode 100644
index 0000000000..5def6dfdd2
--- /dev/null
+++ b/src/armnn/optimizations/FoldPadIntoConvolution2d.hpp
@@ -0,0 +1,93 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "Optimization.hpp"
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+namespace armnn
+{
+namespace optimizations
+{
+
+class FoldPadIntoConvolution2dImpl
+{
+public:
+
+ void Run(Graph& graph, InputSlot& connection) const
+ {
+ Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
+ Layer& child = connection.GetOwningLayer();
+
+ ARMNN_ASSERT(base.GetType() == LayerType::Pad);
+ ARMNN_ASSERT(child.GetType() == LayerType::Convolution2d);
+
+ PadLayer* padLayer = PolymorphicDowncast<PadLayer*>(&base);
+ Convolution2dLayer* convolution2dLayer = PolymorphicDowncast<Convolution2dLayer*>(&child);
+
+ OutputSlot* parentOut = base.GetInputSlot(0).GetConnectedOutputSlot();
+
+ const std::string name = std::string("folded-") + base.GetName() + std::string("-into-") + child.GetName();
+ Convolution2dDescriptor descriptor = convolution2dLayer->GetParameters();
+
+ auto padList = padLayer->GetParameters().m_PadList;
+
+ armnn::DataLayout dataLayout = descriptor.m_DataLayout;
+
+ // In Convolution2dDescriptor, padLeft and padRight are defined as paddings on width dimension
+ // whereas padTop and padBottom - paddings on height dimension, so setting these according to data layout
+ if(dataLayout == armnn::DataLayout::NHWC)
+ {
+ descriptor.m_PadLeft = padList[2].first;
+ descriptor.m_PadRight = padList[2].second;
+ descriptor.m_PadTop = padList[1].first;
+ descriptor.m_PadBottom = padList[1].second;
+ }
+ else
+ {
+ descriptor.m_PadLeft = padList[3].first;
+ descriptor.m_PadRight = padList[3].second;
+ descriptor.m_PadTop = padList[2].first;
+ descriptor.m_PadBottom = padList[2].second;
+ }
+
+ auto& newConv2dLayer = *graph.InsertNewLayer<Convolution2dLayer>(base.GetInputSlot(0),
+ descriptor,
+ name.c_str());
+
+ // Copy weights and bias to the new convolution layer
+ ARMNN_ASSERT_MSG(convolution2dLayer->m_Weight != nullptr,
+ "FoldPadIntoConvolution2d: Weights data should not be null.");
+ newConv2dLayer.m_Weight = std::move(convolution2dLayer->m_Weight);
+ if (descriptor.m_BiasEnabled)
+ {
+ ARMNN_ASSERT_MSG(convolution2dLayer->m_Bias != nullptr,
+ "FoldPadIntoConvolution2d: Bias data should not be null if bias is enabled.");
+ newConv2dLayer.m_Bias = std::move(convolution2dLayer->m_Bias);
+ }
+
+ // Reconnects with original parent.
+ newConv2dLayer.GetOutputSlot().MoveAllConnections(*parentOut);
+ // Parent is now the new convolution2d layer.
+ parentOut = &newConv2dLayer.GetOutputSlot();
+
+ // Moves connections in child output to parent layer.
+ // Child layer will be removed as it's left unconnected.
+ // Base layer will be removed if left unconnected.
+ child.GetOutputSlot().MoveAllConnections(*parentOut);
+ }
+protected:
+ FoldPadIntoConvolution2dImpl() = default;
+ ~FoldPadIntoConvolution2dImpl() = default;
+};
+
+using FoldPadIntoConvolution2d = OptimizeForConnection<PadLayer, Convolution2dLayer, FoldPadIntoConvolution2dImpl>;
+
+} // namespace optimizations
+} // namespace armnn
+
+
diff --git a/src/armnn/optimizations/FoldPadIntoLayer2d.hpp b/src/armnn/optimizations/FoldPadIntoLayer2d.hpp
deleted file mode 100644
index cadc2f3017..0000000000
--- a/src/armnn/optimizations/FoldPadIntoLayer2d.hpp
+++ /dev/null
@@ -1,117 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "Optimization.hpp"
-
-#include <armnn/utility/PolymorphicDowncast.hpp>
-
-namespace armnn
-{
-namespace optimizations
-{
-namespace
-{
-template <typename Layer2dT>
-Layer2dT* FoldPadIntoLayer2dImpl(Graph& graph, InputSlot& connection)
-{
- Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
- Layer& child = connection.GetOwningLayer();
-
- ARMNN_ASSERT(base.GetType() == LayerType::Pad);
- ARMNN_ASSERT(child.GetType() == LayerEnumOf<Layer2dT>());
-
- PadLayer* padLayer = PolymorphicDowncast<PadLayer*>(&base);
- Layer2dT* layer2d = PolymorphicDowncast<Layer2dT*>(&child);
-
- OutputSlot* parentOut = base.GetInputSlot(0).GetConnectedOutputSlot();
-
- const std::string name = std::string("folded-") + base.GetName() + std::string("-into-") + child.GetName();
- auto descriptor = layer2d->GetParameters();
-
- auto padList = padLayer->GetParameters().m_PadList;
-
- armnn::DataLayout dataLayout = descriptor.m_DataLayout;
-
- // In Convolution2dDescriptor/Pooling2dDescriptor, padLeft and padRight are defined as paddings
- // on width dimension whereas padTop and padBottom - paddings on height dimension, so setting these
- // according to data layout
- if(dataLayout == armnn::DataLayout::NHWC)
- {
- descriptor.m_PadLeft = padList[2].first;
- descriptor.m_PadRight = padList[2].second;
- descriptor.m_PadTop = padList[1].first;
- descriptor.m_PadBottom = padList[1].second;
- }
- else
- {
- descriptor.m_PadLeft = padList[3].first;
- descriptor.m_PadRight = padList[3].second;
- descriptor.m_PadTop = padList[2].first;
- descriptor.m_PadBottom = padList[2].second;
- }
-
- const auto newLayer2d = graph.InsertNewLayer<Layer2dT>(base.GetInputSlot(0), descriptor, name.c_str());
-
- // Reconnects with original parent.
- newLayer2d->GetOutputSlot().MoveAllConnections(*parentOut);
- // Parent is now the new layer.
- parentOut = &newLayer2d->GetOutputSlot();
-
- // Moves connections in child output to parent layer.
- // Child layer will be removed as it's left unconnected.
- // Base layer will be removed if left unconnected.
- child.GetOutputSlot().MoveAllConnections(*parentOut);
-
- return newLayer2d;
-}
-} // namespace
-
-class FoldPadIntoConvolution2dImpl
-{
-public:
- void Run(Graph& graph, InputSlot& connection) const
- {
- const auto conv2dLayer = PolymorphicDowncast<Convolution2dLayer*>(&connection.GetOwningLayer());
- const auto newConv2dLayer = FoldPadIntoLayer2dImpl<Convolution2dLayer>(graph, connection);
-
- // Copy weights and bias to the new convolution layer
- ARMNN_ASSERT_MSG(conv2dLayer->m_Weight != nullptr,
- "FoldPadIntoConvolution2d: Weights data should not be null.");
- newConv2dLayer->m_Weight = std::move(conv2dLayer->m_Weight);
- if (conv2dLayer->GetParameters().m_BiasEnabled)
- {
- ARMNN_ASSERT_MSG(conv2dLayer->m_Bias != nullptr,
- "FoldPadIntoConvolution2d: Bias data should not be null if bias is enabled.");
- newConv2dLayer->m_Bias = std::move(conv2dLayer->m_Bias);
- }
- }
-
-protected:
- FoldPadIntoConvolution2dImpl() = default;
- ~FoldPadIntoConvolution2dImpl() = default;
-};
-
-class FoldPadIntoPooling2dImpl
-{
-public:
- void Run(Graph& graph, InputSlot& connection) const
- {
- FoldPadIntoLayer2dImpl<Pooling2dLayer>(graph, connection);
- }
-
-protected:
- FoldPadIntoPooling2dImpl() = default;
- ~FoldPadIntoPooling2dImpl() = default;
-};
-
-using FoldPadIntoConvolution2d = OptimizeForConnection<PadLayer, Convolution2dLayer, FoldPadIntoConvolution2dImpl>;
-using FoldPadIntoPooling2d = OptimizeForConnection<PadLayer, Pooling2dLayer, FoldPadIntoPooling2dImpl>;
-
-} // namespace optimizations
-} // namespace armnn
-
-
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index f0d132a561..fa860abb64 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -624,89 +624,6 @@ BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConvolution2dLayer)
&IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer)
-{
- Graph graph;
- const unsigned int inputShape[] = { 1, 2, 2, 3 };
- const unsigned int paddedShape[] = { 1, 3, 3, 3 };
- const unsigned int outputShape[] = { 1, 2, 2, 3 };
-
- armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32);
- armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
- armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32);
-
- Layer* input = graph.AddLayer<InputLayer>(0, "input");
- input->GetOutputSlot().SetTensorInfo(inputInfo);
-
- PadDescriptor padDescriptor({{ 0, 0 }, { 1, 1 }, { 1, 1 }, { 0, 0 }});
-
- PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
- padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
-
- Pooling2dDescriptor pooling2dDescriptor;
- pooling2dDescriptor.m_PoolWidth = 3;
- pooling2dDescriptor.m_PoolHeight = 3;
- pooling2dDescriptor.m_StrideX = 1;
- pooling2dDescriptor.m_StrideY = 1;
- pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
-
- Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d");
- pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
-
- Layer* output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connect up layers - input -> pad -> pool2d -> output
- input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
- padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0));
- pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- auto checkSimplePool2d = [&](const armnn::Layer* const layer)
- {
- const auto pool2dLayer = static_cast<const armnn::Pooling2dLayer*>(layer);
- return IsLayerOfType<armnn::Pooling2dLayer>(layer) &&
- (layer->GetNameStr() == "pool2d") &&
- (pool2dLayer->GetParameters() == pooling2dDescriptor);
- };
-
- BOOST_TEST(CheckSequence(graph.cbegin(),
- graph.cend(),
- &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::PadLayer>,
- checkSimplePool2d,
- &IsLayerOfType<armnn::OutputLayer>));
-
- armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(FoldPadIntoPooling2d()));
-
- auto checkPadFoldedIntoPool2d = [&](const armnn::Layer* const layer)
- {
- if (!IsLayerOfType<armnn::Pooling2dLayer>(layer) || (layer->GetNameStr() != "folded-pad-into-pool2d"))
- {
- return false;
- }
-
- const auto pool2dLayer = static_cast<const armnn::Pooling2dLayer*>(layer);
- const Pooling2dDescriptor pool2dLayerParams = pool2dLayer->GetParameters();
-
- Pooling2dDescriptor pool2dLayerParamsNoPad = pool2dLayerParams;
- pool2dLayerParamsNoPad.m_PadLeft = 0;
- pool2dLayerParamsNoPad.m_PadRight = 0;
- pool2dLayerParamsNoPad.m_PadTop = 0;
- pool2dLayerParamsNoPad.m_PadBottom = 0;
-
- return (pool2dLayerParamsNoPad == pooling2dDescriptor) &&
- (pool2dLayerParams.m_PadLeft == 1) &&
- (pool2dLayerParams.m_PadRight == 1) &&
- (pool2dLayerParams.m_PadTop == 1) &&
- (pool2dLayerParams.m_PadBottom == 1);
- };
-
- BOOST_TEST(CheckSequence(graph.cbegin(),
- graph.cend(),
- &IsLayerOfType<armnn::InputLayer>,
- checkPadFoldedIntoPool2d,
- &IsLayerOfType<armnn::OutputLayer>));
-}
-
class MockLayerSupport : public LayerSupportBase {
public:
bool IsInputSupported(const TensorInfo& /*input*/,
@@ -995,4 +912,4 @@ BOOST_AUTO_TEST_CASE(OptimizeForExclusiveConnectionsWithoutFuseTest)
&IsLayerOfType<armnn::OutputLayer>,
&IsLayerOfType<armnn::OutputLayer>));
}
-BOOST_AUTO_TEST_SUITE_END()
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file